In [1]:
import numpy as np
# Define a softmax function
def softmax(x):
p=np.exp(x)/np.sum(np.exp(x),axis=0)
return p
# Define a linearClassifier
def linearClassifier(x,W,b):
# The np.dot() is needed for the matrix product
# of the weights and feature vector
return softmax(np.dot(W,x)+b)
# Test softmax() out on a vector
test=[1,3,2]
print(softmax(test))
# Again on a matrix
test=np.ones((3,4))
test[0,:]=2.
print(test)
print(softmax(test))
# Test linearClassifier out on an array
W=np.ones((3,4))
W[0,:]=2.
x=np.asarray([1,0,1,0])
b=np.asarray([1,1,2])
print(x)
print(W)
print(b)
print(linearClassifier(x,W,b))
# Define a class
# ---------------------------
class linearClassifier:
def __init__(self, W, b):
self.W = W
self.b = b
def predict(self,x):
W = self.W
b = self.b
return softmax(np.dot(W,x)+b)
myLC=linearClassifier(W,b)
print(myLC.predict(x))